{
unsigned long d0, d1, res;
- __asm__ __volatile__ (
+ asm volatile (
" xor %%eax,%%eax\n\t" /* also ensures ZF==1 if size==0 */
" repe; scas"__OS"\n\t"
" je 1f\n\t"
if ( bit != 0 )
{
/* Look for a bit in the first word. */
- __asm__ ( "bsf %1,%%"__OP"ax"
- : "=a" (set) : "r" (*p >> bit), "0" (BITS_PER_LONG) );
+ asm ( "bsf %1,%%"__OP"ax"
+ : "=a" (set) : "r" (*p >> bit), "0" (BITS_PER_LONG) );
if ( set < (BITS_PER_LONG - bit) )
return (offset + set);
offset += BITS_PER_LONG - bit;
{
unsigned long d0, d1, d2, res;
- __asm__ (
+ asm volatile (
" xor %%edx,%%edx\n\t" /* also ensures ZF==1 if size==0 */
" repe; scas"__OS"\n\t"
" je 1f\n\t"
if ( bit != 0 )
{
/* Look for zero in the first word. */
- __asm__ ( "bsf %1,%%"__OP"ax" : "=a" (set) : "r" (~(*p >> bit)) );
+ asm ( "bsf %1,%%"__OP"ax" : "=a" (set) : "r" (~(*p >> bit)) );
if ( set < (BITS_PER_LONG - bit) )
return (offset + set);
offset += BITS_PER_LONG - bit;
*(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
*(unsigned long *)(&gdt_load[2]) = GDT_VIRT_START(current);
- __asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) );
+ asm volatile ( "lgdt %0" : "=m" (gdt_load) );
/* No nested task. */
- __asm__("pushf ; andw $0xbfff,(%"__OP"sp) ; popf");
+ asm volatile ("pushf ; andw $0xbfff,(%"__OP"sp) ; popf" );
/* Ensure FPU gets initialised for each domain. */
stts();
#endif
set_tss_desc(cpu,t);
load_TR(cpu);
- __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
+ asm volatile ( "lldt %%ax" : : "a" (0) );
/* Clear all 6 debug registers: */
-#define CD(register) __asm__("mov %0,%%db" #register ::"r"(0UL) );
+#define CD(register) asm volatile ( "mov %0,%%db" #register : : "r"(0UL) );
CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
#undef CD
#define loadsegment(seg,value) ({ \
int __r = 1; \
- __asm__ __volatile__ ( \
+ asm volatile ( \
"1: movl %k1,%%" #seg "\n2:\n" \
".section .fixup,\"ax\"\n" \
"3: xorl %k0,%k0\n" \
/* If in kernel mode then switch the GS bases around. */
if ( (n->arch.flags & TF_kernel_mode) )
- __asm__ __volatile__ ( "swapgs" );
+ asm volatile ( "swapgs" );
}
if ( unlikely(!all_segs_okay) )
}
#define loaddebug(_v,_reg) \
- __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
+ asm volatile ( "mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]) )
static void __context_switch(void)
{
char gdt_load[10];
*(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
*(unsigned long *)(&gdt_load[2]) = GDT_VIRT_START(n);
- __asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) );
+ asm volatile ( "lgdt %0" : "=m" (gdt_load) );
}
if ( p->domain != n->domain )
{
u32 quotient, remainder;
ASSERT(dividend < divisor);
- __asm__ (
+ asm (
"divl %4"
: "=a" (quotient), "=d" (remainder)
: "0" (0), "1" (dividend), "r" (divisor) );
static inline u32 mul_frac(u32 multiplicand, u32 multiplier)
{
u32 product_int, product_frac;
- __asm__ (
+ asm (
"mul %3"
: "=a" (product_frac), "=d" (product_int)
: "0" (multiplicand), "r" (multiplier) );
delta <<= scale->shift;
#ifdef CONFIG_X86_32
- __asm__ (
+ asm (
"mul %5 ; "
"mov %4,%%eax ; "
"mov %%edx,%4 ; "
: "=A" (product), "=r" (tmp1), "=r" (tmp2)
: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (scale->mul_frac) );
#else
- __asm__ (
+ asm (
"mul %%rdx ; shrd $32,%%rdx,%%rax"
: "=a" (product) : "0" (delta), "d" ((u64)scale->mul_frac) );
#endif
return 0;
eip += sizeof(instr);
- __asm__ (
+ asm (
"cpuid"
: "=a" (a), "=b" (b), "=c" (c), "=d" (d)
: "0" (a), "1" (b), "2" (c), "3" (d) );
*ar = desc.b & 0x00f0ff00;
if ( !(desc.b & _SEGMENT_L) )
{
- *base = (desc.a >> 16) + ((desc.b & 0xff) << 16) + (desc.b & 0xff000000);
+ *base = ((desc.a >> 16) + ((desc.b & 0xff) << 16) +
+ (desc.b & 0xff000000));
*limit = (desc.a & 0xffff) | (desc.b & 0x000f0000);
if ( desc.b & _SEGMENT_G )
*limit = ((*limit + 1) << 12) - 1;
#ifndef NDEBUG
- if ( !vm86_mode(regs) && sel > 3 )
+ if ( !vm86_mode(regs) && (sel > 3) )
{
unsigned int a, l;
unsigned char valid;
- __asm__("larl %2, %0\n\tsetz %1" : "=r" (a), "=rm" (valid) : "rm" (sel));
- BUG_ON(valid && (a & 0x00f0ff00) != *ar);
- __asm__("lsll %2, %0\n\tsetz %1" : "=r" (l), "=rm" (valid) : "rm" (sel));
- BUG_ON(valid && l != *limit);
+ asm volatile (
+ "larl %2,%0 ; setz %1"
+ : "=r" (a), "=rm" (valid) : "rm" (sel));
+ BUG_ON(valid && ((a & 0x00f0ff00) != *ar));
+ asm volatile (
+ "lsll %2,%0 ; setz %1"
+ : "=r" (l), "=rm" (valid) : "rm" (sel));
+ BUG_ON(valid && (l != *limit));
}
#endif
}
unsigned long condition;
struct vcpu *v = current;
- __asm__ __volatile__("mov %%db6,%0" : "=r" (condition));
+ asm volatile ( "mov %%db6,%0" : "=r" (condition) );
/* Mask out spurious debug traps due to lazy DR7 setting */
if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
(v->arch.guest_context.debugreg[7] == 0) )
{
- __asm__("mov %0,%%db7" : : "r" (0UL));
+ asm volatile ( "mov %0,%%db7" : : "r" (0UL) );
goto out;
}
if ( !access_ok(value, sizeof(long)) )
return -EPERM;
if ( p == current )
- __asm__ ( "mov %0, %%db0" : : "r" (value) );
+ asm volatile ( "mov %0, %%db0" : : "r" (value) );
break;
case 1:
if ( !access_ok(value, sizeof(long)) )
return -EPERM;
if ( p == current )
- __asm__ ( "mov %0, %%db1" : : "r" (value) );
+ asm volatile ( "mov %0, %%db1" : : "r" (value) );
break;
case 2:
if ( !access_ok(value, sizeof(long)) )
return -EPERM;
if ( p == current )
- __asm__ ( "mov %0, %%db2" : : "r" (value) );
+ asm volatile ( "mov %0, %%db2" : : "r" (value) );
break;
case 3:
if ( !access_ok(value, sizeof(long)) )
return -EPERM;
if ( p == current )
- __asm__ ( "mov %0, %%db3" : : "r" (value) );
+ asm volatile ( "mov %0, %%db3" : : "r" (value) );
break;
case 6:
/*
value &= 0xffffefff; /* reserved bits => 0 */
value |= 0xffff0ff0; /* reserved bits => 1 */
if ( p == current )
- __asm__ ( "mov %0, %%db6" : : "r" (value) );
+ asm volatile ( "mov %0, %%db6" : : "r" (value) );
break;
case 7:
/*
if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM;
}
if ( p == current )
- __asm__ ( "mov %0, %%db7" : : "r" (value) );
+ asm volatile ( "mov %0, %%db7" : : "r" (value) );
break;
default:
return -EINVAL;
{
unsigned int cpu, tr;
- asm ( "str %0" : "=r" (tr) );
+ asm volatile ( "str %0" : "=r" (tr) );
cpu = ((tr >> 3) - __FIRST_TSS_ENTRY) >> 2;
watchdog_disable();
if ( is_pv_32bit_vcpu(v) )
return;
v->arch.flags ^= TF_kernel_mode;
- __asm__ __volatile__ ( "swapgs" );
+ asm volatile ( "swapgs" );
update_cr3(v);
#ifdef USER_MAPPINGS_ARE_GLOBAL
/* Don't flush user global mappings from the TLB. Don't tick TLB clock. */
- __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (v->arch.cr3) : "memory" );
+ asm volatile ( "mov %0, %%cr3" : : "r" (v->arch.cr3) : "memory" );
#else
write_ptbase(v);
#endif
switch ( (_dst).bytes ) \
{ \
case 2: \
- __asm__ __volatile__ ( \
+ asm volatile ( \
_PRE_EFLAGS("0","4","2") \
_op"w %"_wx"3,%1; " \
_POST_EFLAGS("0","4","2") \
"m" (_eflags), "m" ((_dst).val) ); \
break; \
case 4: \
- __asm__ __volatile__ ( \
+ asm volatile ( \
_PRE_EFLAGS("0","4","2") \
_op"l %"_lx"3,%1; " \
_POST_EFLAGS("0","4","2") \
switch ( (_dst).bytes ) \
{ \
case 1: \
- __asm__ __volatile__ ( \
+ asm volatile ( \
_PRE_EFLAGS("0","4","2") \
_op"b %"_bx"3,%1; " \
_POST_EFLAGS("0","4","2") \
switch ( (_dst).bytes ) \
{ \
case 1: \
- __asm__ __volatile__ ( \
+ asm volatile ( \
_PRE_EFLAGS("0","3","2") \
_op"b %1; " \
_POST_EFLAGS("0","3","2") \
: "i" (EFLAGS_MASK), "m" (_eflags), "m" ((_dst).val) ); \
break; \
case 2: \
- __asm__ __volatile__ ( \
+ asm volatile ( \
_PRE_EFLAGS("0","3","2") \
_op"w %1; " \
_POST_EFLAGS("0","3","2") \
: "i" (EFLAGS_MASK), "m" (_eflags), "m" ((_dst).val) ); \
break; \
case 4: \
- __asm__ __volatile__ ( \
+ asm volatile ( \
_PRE_EFLAGS("0","3","2") \
_op"l %1; " \
_POST_EFLAGS("0","3","2") \
/* Emulate an instruction with quadword operands (x86/64 only). */
#if defined(__x86_64__)
#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \
-do{ __asm__ __volatile__ ( \
+do{ asm volatile ( \
_PRE_EFLAGS("0","4","2") \
_op"q %"_qx"3,%1; " \
_POST_EFLAGS("0","4","2") \
"m" (_eflags), "m" ((_dst).val) ); \
} while (0)
#define __emulate_1op_8byte(_op, _dst, _eflags) \
-do{ __asm__ __volatile__ ( \
+do{ asm volatile ( \
_PRE_EFLAGS("0","3","2") \
_op"q %1; " \
_POST_EFLAGS("0","3","2") \
/* Given byte has even parity (even number of 1s)? */
static int even_parity(uint8_t v)
{
- __asm__ ( "test %%al,%%al; setp %%al"
+ asm ( "test %%al,%%al; setp %%al"
: "=a" (v) : "0" (v) );
return v;
}
break;
case 4:
#ifdef __x86_64__
- __asm__ ( "bswap %k0" : "=r" (dst.val) : "0" (*dst.reg) );
+ asm ( "bswap %k0" : "=r" (dst.val) : "0" (*dst.reg) );
break;
case 8:
#endif
- __asm__ ( "bswap %0" : "=r" (dst.val) : "0" (*dst.reg) );
+ asm ( "bswap %0" : "=r" (dst.val) : "0" (*dst.reg) );
break;
}
break;
{
int oldbit;
- __asm__(
+ __asm__ __volatile__(
"btsl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"dIr" (nr), "m" (ADDR) : "memory");
{
int oldbit;
- __asm__(
+ __asm__ __volatile__(
"btrl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"dIr" (nr), "m" (ADDR) : "memory");
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b));
- return v.u;
+ return v.u;
}
/* Do not define swab16. Gcc is smart enough to recognize "C" version and
#if BITS_PER_LONG == 64
-# define do_div(n,base) ({ \
- uint32_t __base = (base); \
- uint32_t __rem; \
- __rem = ((uint64_t)(n)) % __base; \
- (n) = ((uint64_t)(n)) / __base; \
- __rem; \
- })
+#define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+})
#else
* This ends up being the most efficient "calling
* convention" on x86.
*/
-#define do_div(n,base) ({ \
- unsigned long __upper, __low, __high, __mod, __base; \
- __base = (base); \
- asm("":"=a" (__low), "=d" (__high):"A" (n)); \
- __upper = __high; \
- if (__high) { \
- __upper = __high % (__base); \
- __high = __high / (__base); \
- } \
- asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
- asm("":"=A" (n):"a" (__low),"d" (__high)); \
- __mod; \
+#define do_div(n,base) ({ \
+ unsigned long __upper, __low, __high, __mod, __base; \
+ __base = (base); \
+ asm ( "" : "=a" (__low), "=d" (__high) : "A" (n) ); \
+ __upper = __high; \
+ if ( __high ) \
+ { \
+ __upper = __high % (__base); \
+ __high = __high / (__base); \
+ } \
+ asm ( "divl %2" \
+ : "=a" (__low), "=d" (__mod) \
+ : "rm" (__base), "0" (__low), "1" (__upper) ); \
+ asm ( "" : "=A" (n) : "a" (__low), "d" (__high) ); \
+ __mod; \
})
#endif
* instruction pointer ("program counter").
*/
#ifdef __x86_64__
-#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
+#define current_text_addr() ({ \
+ void *pc; \
+ asm ( "leaq 1f(%%rip),%0\n1:" : "=r" (pc) ); \
+ pc; \
+})
#else
-#define current_text_addr() \
- ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
+#define current_text_addr() ({ \
+ void *pc; \
+ asm ( "movl $1f,%0\n1:" : "=g" (pc) ); \
+ pc; \
+})
#endif
struct cpuinfo_x86 {
* resulting in stale register contents being returned.
*/
#define cpuid(_op,_eax,_ebx,_ecx,_edx) \
- __asm__("cpuid" \
- : "=a" (*(int *)(_eax)), \
- "=b" (*(int *)(_ebx)), \
- "=c" (*(int *)(_ecx)), \
- "=d" (*(int *)(_edx)) \
- : "0" (_op), "2" (0))
+ asm ( "cpuid" \
+ : "=a" (*(int *)(_eax)), \
+ "=b" (*(int *)(_ebx)), \
+ "=c" (*(int *)(_ecx)), \
+ "=d" (*(int *)(_edx)) \
+ : "0" (_op), "2" (0) )
/* Some CPUID calls want 'count' to be placed in ecx */
static inline void cpuid_count(
unsigned int *ecx,
unsigned int *edx)
{
- __asm__("cpuid"
- : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
- : "0" (op), "c" (count));
+ asm ( "cpuid"
+ : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
+ : "0" (op), "c" (count) );
}
/*
{
unsigned int eax;
- __asm__("cpuid"
- : "=a" (eax)
- : "0" (op)
- : "bx", "cx", "dx");
+ asm ( "cpuid"
+ : "=a" (eax)
+ : "0" (op)
+ : "bx", "cx", "dx" );
return eax;
}
+
static always_inline unsigned int cpuid_ebx(unsigned int op)
{
unsigned int eax, ebx;
- __asm__("cpuid"
- : "=a" (eax), "=b" (ebx)
- : "0" (op)
- : "cx", "dx" );
+ asm ( "cpuid"
+ : "=a" (eax), "=b" (ebx)
+ : "0" (op)
+ : "cx", "dx" );
return ebx;
}
+
static always_inline unsigned int cpuid_ecx(unsigned int op)
{
unsigned int eax, ecx;
- __asm__("cpuid"
- : "=a" (eax), "=c" (ecx)
- : "0" (op)
- : "bx", "dx" );
+ asm ( "cpuid"
+ : "=a" (eax), "=c" (ecx)
+ : "0" (op)
+ : "bx", "dx" );
return ecx;
}
+
static always_inline unsigned int cpuid_edx(unsigned int op)
{
unsigned int eax, edx;
- __asm__("cpuid"
- : "=a" (eax), "=d" (edx)
- : "0" (op)
- : "bx", "cx");
+ asm ( "cpuid"
+ : "=a" (eax), "=d" (edx)
+ : "0" (op)
+ : "bx", "cx" );
return edx;
}
-
-
static inline unsigned long read_cr0(void)
{
- unsigned long __cr0;
- __asm__("mov %%cr0,%0\n\t" :"=r" (__cr0));
- return __cr0;
+ unsigned long cr0;
+ asm volatile ( "mov %%cr0,%0\n\t" : "=r" (cr0) );
+ return cr0;
}
static inline void write_cr0(unsigned long val)
{
- __asm__("mov %0,%%cr0": :"r" ((unsigned long)val));
+ asm volatile ( "mov %0,%%cr0" : : "r" ((unsigned long)val) );
}
static inline unsigned long read_cr2(void)
{
- unsigned long __cr2;
- __asm__("mov %%cr2,%0\n\t" :"=r" (__cr2));
- return __cr2;
+ unsigned long cr2;
+ asm volatile ( "mov %%cr2,%0\n\t" : "=r" (cr2) );
+ return cr2;
}
static inline unsigned long read_cr4(void)
{
- unsigned long __cr4;
- __asm__("mov %%cr4,%0\n\t" :"=r" (__cr4));
- return __cr4;
+ unsigned long cr4;
+ asm volatile ( "mov %%cr4,%0\n\t" : "=r" (cr4) );
+ return cr4;
}
static inline void write_cr4(unsigned long val)
{
- __asm__("mov %0,%%cr4": :"r" ((unsigned long)val));
+ asm volatile ( "mov %0,%%cr4" : : "r" ((unsigned long)val) );
}
-
/* Clear and set 'TS' bit respectively */
static inline void clts(void)
{
- __asm__ __volatile__ ("clts");
+ asm volatile ( "clts" );
}
static inline void stts(void)
write_cr0(X86_CR0_TS|read_cr0());
}
-
/*
* Save the cr4 feature set we're using (ie
* Pentium 4MB enable and PPro Global page
#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
#define setCx86(reg, data) do { \
- outb((reg), 0x22); \
- outb((data), 0x23); \
+ outb((reg), 0x22); \
+ outb((data), 0x23); \
} while (0)
/* Stop speculative execution */
static inline void sync_core(void)
{
int tmp;
- asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
+ asm volatile (
+ "cpuid"
+ : "=a" (tmp)
+ : "0" (1)
+ : "ebx","ecx","edx","memory" );
}
static always_inline void __monitor(const void *eax, unsigned long ecx,
- unsigned long edx)
+ unsigned long edx)
{
- /* "monitor %eax,%ecx,%edx;" */
- asm volatile(
- ".byte 0x0f,0x01,0xc8;"
- : :"a" (eax), "c" (ecx), "d"(edx));
+ /* "monitor %eax,%ecx,%edx;" */
+ asm volatile (
+ ".byte 0x0f,0x01,0xc8;"
+ : : "a" (eax), "c" (ecx), "d"(edx) );
}
static always_inline void __mwait(unsigned long eax, unsigned long ecx)
{
- /* "mwait %eax,%ecx;" */
- asm volatile(
- ".byte 0x0f,0x01,0xc9;"
- : :"a" (eax), "c" (ecx));
+ /* "mwait %eax,%ecx;" */
+ asm volatile (
+ ".byte 0x0f,0x01,0xc9;"
+ : : "a" (eax), "c" (ecx) );
}
#define IOBMP_BYTES 8192
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static always_inline void rep_nop(void)
{
- __asm__ __volatile__ ( "rep;nop" : : : "memory" );
+ asm volatile ( "rep;nop" : : : "memory" );
}
#define cpu_relax() rep_nop()
#define ARCH_HAS_PREFETCH
extern always_inline void prefetch(const void *x)
{
- __asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
+ asm volatile ( "prefetchnta (%0)" : : "r"(x) );
}
#elif CONFIG_X86_USE_3DNOW
extern always_inline void prefetch(const void *x)
{
- __asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
+ asm volatile ( "prefetch (%0)" : : "r"(x) );
}
extern always_inline void prefetchw(const void *x)
{
- __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
+ asm volatile ( "prefetchw (%0)" : : "r"(x) );
}
#define spin_lock_prefetch(x) prefetchw(x)